[clang] 66c0560 - [RISCV][Clang] Add some RVV Integer intrinsic functions.
Zakk Chen via cfe-commits
cfe-commits at lists.llvm.org
Tue Apr 6 03:08:11 PDT 2021
Author: Zakk Chen
Date: 2021-04-06T03:07:36-07:00
New Revision: 66c05609e0d5b758295b3c20d3dde4f3c3cda673
URL: https://github.com/llvm/llvm-project/commit/66c05609e0d5b758295b3c20d3dde4f3c3cda673
DIFF: https://github.com/llvm/llvm-project/commit/66c05609e0d5b758295b3c20d3dde4f3c3cda673.diff
LOG: [RISCV][Clang] Add some RVV Integer intrinsic functions.
1. Rename RVVBinBuiltin to RVVOutputOp1Builtin because it is not related
to the number of operand.
2. Add RVV Integer instuctions which use RVVOutputOp1Builtin.
Reviewed By: craig.topper
Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Zakk Chen <zakk.chen at sifive.com>
Differential Revision: https://reviews.llvm.org/D99524
Added:
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
Modified:
clang/include/clang/Basic/riscv_vector.td
Removed:
################################################################################
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 2932ee1ab0f2..cbacb4a56c8f 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -200,24 +200,58 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
// Basic classes with automatic codegen.
//===----------------------------------------------------------------------===//
-class RVVBinBuiltin<string suffix, string prototype, string type_range>
- : RVVBuiltin<suffix, prototype, type_range> {
- let IntrinsicTypes = [-1, 1];
-}
-
-multiclass RVVBinBuiltinSet<string intrinsic_name, string type_range,
- list<list<string>> suffixes_prototypes> {
- let IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask" in {
+multiclass RVVBuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes,
+ list<int> intrinsic_types> {
+ let IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask",
+ IntrinsicTypes = intrinsic_types in {
foreach s_p = suffixes_prototypes in {
let Name = NAME # "_" # s_p[0] in {
defvar suffix = s_p[1];
defvar prototype = s_p[2];
- def : RVVBinBuiltin<suffix, prototype, type_range>;
+ def : RVVBuiltin<suffix, prototype, type_range>;
}
}
}
}
+// IntrinsicTypes is output, op1 [-1, 1]
+multiclass RVVOutOp1BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes> {
+ defm "" : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 1]>;
+}
+
+multiclass RVVSignedBinBuiltinSet {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvv"],
+ ["vx", "v", "vve"]]>;
+}
+
+multiclass RVVUnsignedBinBuiltinSet {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "Uv", "UvUvUv"],
+ ["vx", "Uv", "UvUvUe"]]>;
+}
+
+multiclass RVVIntBinBuiltinSet {
+ defm "" : RVVSignedBinBuiltinSet;
+ defm "" : RVVUnsignedBinBuiltinSet;
+}
+
+multiclass RVVIntTerBuiltinSet {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvvv"],
+ ["vx", "v", "vvev"],
+ ["vv", "Uv", "UvUvUvUv"],
+ ["vx", "Uv", "UvUvUeUv"]]>;
+}
+
+multiclass RVVFloatingBinBuiltinSet {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
+ [["vv", "v", "vvv"],
+ ["vf", "v", "vve"]]>;
+}
+
defvar TypeList = ["c","s","i","l","f","d"];
defvar EEWList = [["8", "(Log2EEW:3)"],
["16", "(Log2EEW:4)"],
@@ -387,14 +421,85 @@ defm : RVVIndexedLoad<"vloxei">;
// 12. Vector Integer Arithmetic Instructions
// 12.1. Vector Single-Width Integer Add and Subtract
-defm vadd : RVVBinBuiltinSet<"vadd", "csil",
- [["vv", "v", "vvv"],
- ["vx", "v", "vve"],
- ["vv", "Uv", "UvUvUv"],
- ["vx", "Uv", "UvUvUe"]]>;
+defm vadd : RVVIntBinBuiltinSet;
+defm vsub : RVVIntBinBuiltinSet;
+defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil",
+ [["vx", "v", "vve"],
+ ["vx", "Uv", "UvUvUe"]]>;
+
+// 12.2. Vector Widening Integer Add/Subtract
+// TODO
+
+// 12.3. Vector Integer Extension
+// TODO
+
+// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
+// TODO
+
+// 12.5. Vector Bitwise Logical Instructions
+defm vand : RVVIntBinBuiltinSet;
+defm vxor : RVVIntBinBuiltinSet;
+defm vor : RVVIntBinBuiltinSet;
+
+// 12.6. Vector Single-Width Bit Shift Instructions
+defm vsll : RVVOutOp1BuiltinSet<"vsll", "csil",
+ [["vv", "v", "vvUv"],
+ ["vx", "v", "vvz"],
+ ["vv", "Uv", "UvUvUv"],
+ ["vx", "Uv", "UvUvz"]]>;
+defm vsrl : RVVOutOp1BuiltinSet<"vsrl", "csil",
+ [["vv", "Uv", "UvUvUv"],
+ ["vx", "Uv", "UvUvz"]]>;
+defm vsra : RVVOutOp1BuiltinSet<"vsra", "csil",
+ [["vv", "v", "vvUv"],
+ ["vx", "v", "vvz"]]>;
+
+// 12.7. Vector Narrowing Integer Right Shift Instructions
+// TODO
+
+// 12.8. Vector Integer Comparison Instructions
+// TODO
+
+// 12.9. Vector Integer Min/Max Instructions
+defm vminu : RVVUnsignedBinBuiltinSet;
+defm vmin : RVVSignedBinBuiltinSet;
+defm vmaxu : RVVUnsignedBinBuiltinSet;
+defm vmax : RVVSignedBinBuiltinSet;
+
+// 12.10. Vector Single-Width Integer Multiply Instructions
+defm vmul : RVVIntBinBuiltinSet;
+defm vmulh : RVVSignedBinBuiltinSet;
+defm vmulhu : RVVUnsignedBinBuiltinSet;
+defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil",
+ [["vv", "v", "vvUv"],
+ ["vx", "v", "vvUe"]]>;
+
+// 12.11. Vector Integer Divide Instructions
+defm vdivu : RVVUnsignedBinBuiltinSet;
+defm vdiv : RVVSignedBinBuiltinSet;
+defm vremu : RVVUnsignedBinBuiltinSet;
+defm vrem : RVVSignedBinBuiltinSet;
+
+// 12.12. Vector Widening Integer Multiply Instructions
+// TODO
+
+// 12.13. Vector Single-Width Integer Multiply-Add Instructions
+let HasMaskedOffOperand = false in {
+ defm vmacc : RVVIntTerBuiltinSet;
+ defm vnmsac : RVVIntTerBuiltinSet;
+ defm vmadd : RVVIntTerBuiltinSet;
+ defm vnmsub : RVVIntTerBuiltinSet;
+}
+
+// 12.14. Vector Widening Integer Multiply-Add Instructions
+// TODO
+
+// 12.15. Vector Integer Merge Instructions
+// TODO
+
+// 12.16. Vector Integer Move Instructions
+// TODO
// 14. Vector Floating-Point Instructions
// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
-defm vfadd : RVVBinBuiltinSet<"vfadd", "fd",
- [["vv", "v", "vvv"],
- ["vf", "v", "vve"]]>;
+defm vfadd : RVVFloatingBinBuiltinSet;
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
new file mode 100644
index 000000000000..1a3d0a060c01
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
new file mode 100644
index 000000000000..3a412c0257c4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
new file mode 100644
index 000000000000..d8c9549ecf1c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
new file mode 100644
index 000000000000..aea689906706
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
new file mode 100644
index 000000000000..388fa93a85bc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
new file mode 100644
index 000000000000..6dd3e7ad4e67
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
new file mode 100644
index 000000000000..0a370c7a2b89
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
@@ -0,0 +1,6168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
new file mode 100644
index 000000000000..6771870536b4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
new file mode 100644
index 000000000000..f71f384135d0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
new file mode 100644
index 000000000000..168a6f9ee88e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
new file mode 100644
index 000000000000..ff6e320a7574
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
new file mode 100644
index 000000000000..486232ce6ff4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
@@ -0,0 +1,1240 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
new file mode 100644
index 000000000000..e3fde37a24d8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
new file mode 100644
index 000000000000..9b8a93ee26b5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
@@ -0,0 +1,1240 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
new file mode 100644
index 000000000000..46ad59cd5b83
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
@@ -0,0 +1,1240 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
new file mode 100644
index 000000000000..31e047dbe709
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
new file mode 100644
index 000000000000..42ba2e276a8b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
new file mode 100644
index 000000000000..33f2836b5cda
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vand_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vand_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vand_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vand_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vand_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vand_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vand_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vand_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vand_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vand_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vand_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vand_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vand_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vand_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vand_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vand_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vand_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vand_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vand_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vand_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vand_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vand_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vand_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vand_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vand_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vand_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vand_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vand_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vand_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vand_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vand_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vand_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vand_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vand_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vand_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vand_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vand_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vand_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vand_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vand_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vand_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vand_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vand_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vand_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vand_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vand_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vand_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vand_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vand_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vand_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vand_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vand_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vand_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vand_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vand_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vand_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vand_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vand_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vand_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vand_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vand_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vand_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vand_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vand_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vand_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vand_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vand_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vand_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vand_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vand_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vand_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vand_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vand_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vand_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vand_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vand_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vand_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vand_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vand_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vand_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vand_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vand_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vand_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vand_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vand_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vand_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vand_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vand_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
new file mode 100644
index 000000000000..3f2138a44fce
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vdiv_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vdiv_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vdiv_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vdiv_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vdiv_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vdiv_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vdiv_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vdiv_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vdiv_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vdiv_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vdiv_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vdiv_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vdiv_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vdiv_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vdiv_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vdiv_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vdiv_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vdiv_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vdiv_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vdiv_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vdiv_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vdiv_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vdivu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vdivu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vdivu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vdivu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vdivu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vdivu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vdivu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vdivu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vdivu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vdivu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vdivu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vdivu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vdivu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vdivu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vdivu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vdivu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vdivu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vdivu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vdivu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vdivu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vdivu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vdivu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vdiv_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vdiv_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vdiv_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vdiv_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vdiv_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vdiv_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vdiv_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vdiv_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vdiv_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vdiv_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vdiv_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vdiv_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vdiv_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vdiv_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vdiv_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vdiv_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vdiv_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vdiv_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vdiv_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vdiv_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vdiv_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vdiv_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vdivu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vdivu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vdivu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vdivu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vdivu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vdivu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vdivu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vdivu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vdivu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vdivu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vdivu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vdivu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vdivu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vdivu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vdivu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vdivu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vdivu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vdivu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vdivu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vdivu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vdivu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vdivu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
new file mode 100644
index 000000000000..44915ecd355f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc_vv_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc_vx_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc_vv_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc_vx_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc_vv_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc_vx_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc_vv_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc_vx_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc_vv_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc_vx_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc_vv_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc_vx_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc_vv_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc_vx_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc_vv_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc_vx_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc_vv_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc_vx_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc_vv_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc_vx_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc_vv_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc_vx_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc_vv_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc_vx_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc_vv_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc_vx_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc_vv_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc_vx_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc_vv_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc_vx_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc_vv_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc_vx_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc_vv_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc_vx_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc_vv_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc_vx_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc_vv_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc_vx_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc_vv_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc_vx_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc_vv_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc_vx_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc_vv_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc_vx_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc_vv_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc_vx_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc_vv_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc_vx_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc_vv_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc_vx_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc_vv_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc_vx_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc_vv_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc_vx_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc_vv_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc_vx_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc_vv_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc_vx_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc_vv_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc_vx_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc_vv_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc_vx_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc_vv_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc_vx_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc_vv_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc_vx_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc_vv_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc_vx_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc_vv_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc_vx_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc_vv_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc_vx_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc_vv_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc_vx_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc_vv_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc_vx_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc_vv_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc_vx_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc_vv_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc_vx_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc_vv_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc_vx_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc_vv_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc_vx_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc_vv_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc_vx_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc_vv_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc_vx_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc_vv_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc_vx_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc_vv_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc_vx_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc_vv_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc_vx_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc_vv_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc_vx_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc_vv_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc_vx_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc_vv_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc_vx_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc_vv_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc_vx_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc_vv_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc_vx_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc_vv_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc_vx_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc_vv_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc_vx_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc_vv_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc_vx_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc_vv_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc_vx_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc_vv_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc_vx_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc_vv_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc_vx_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc_vv_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc_vx_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc_vv_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc_vx_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc_vv_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc_vx_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc_vv_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc_vx_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc_vv_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc_vx_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc_vv_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc_vx_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc_vv_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc_vx_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc_vv_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc_vx_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc_vv_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc_vx_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc_vv_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc_vx_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc_vv_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc_vx_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc_vv_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc_vx_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc_vv_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc_vx_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc_vv_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc_vx_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc_vv_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc_vx_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc_vv_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc_vx_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc_vv_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc_vx_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc_vv_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc_vx_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc_vv_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc_vx_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc_vv_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc_vx_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc_vv_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc_vx_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc_vv_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc_vx_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc_vv_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc_vx_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc_vv_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc_vx_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc_vv_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc_vx_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc_vv_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc_vx_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc_vv_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc_vx_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc_vv_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc_vx_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc_vv_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc_vx_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc_vv_u64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc_vx_u64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
new file mode 100644
index 000000000000..8980828fb045
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd_vv_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd_vx_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd_vv_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd_vx_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd_vv_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd_vx_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd_vv_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd_vx_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd_vv_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd_vx_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd_vv_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd_vx_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd_vv_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd_vx_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd_vv_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd_vx_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd_vv_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd_vx_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd_vv_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd_vx_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd_vv_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd_vx_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd_vv_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd_vx_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd_vv_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd_vx_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd_vv_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd_vx_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd_vv_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd_vx_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd_vv_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd_vx_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd_vv_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd_vx_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd_vv_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd_vx_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd_vv_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd_vx_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd_vv_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd_vx_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd_vv_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd_vx_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd_vv_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd_vx_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd_vv_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd_vx_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd_vv_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd_vx_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd_vv_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd_vx_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd_vv_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd_vx_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd_vv_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd_vx_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd_vv_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd_vx_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd_vv_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd_vx_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd_vv_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd_vx_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd_vv_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd_vx_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd_vv_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd_vx_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd_vv_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd_vx_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd_vv_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd_vx_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd_vv_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd_vx_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd_vv_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd_vx_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd_vv_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd_vx_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd_vv_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd_vx_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd_vv_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd_vx_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd_vv_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd_vx_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd_vv_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd_vx_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd_vv_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd_vx_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd_vv_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd_vx_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd_vv_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd_vx_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd_vv_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd_vx_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd_vv_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd_vx_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd_vv_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd_vx_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd_vv_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd_vx_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd_vv_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd_vx_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd_vv_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd_vx_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd_vv_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd_vx_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd_vv_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd_vx_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd_vv_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd_vx_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd_vv_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd_vx_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd_vv_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd_vx_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd_vv_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd_vx_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd_vv_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd_vx_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd_vv_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd_vx_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd_vv_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd_vx_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd_vv_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd_vx_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd_vv_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd_vx_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd_vv_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd_vx_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd_vv_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd_vx_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd_vv_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd_vx_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd_vv_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd_vx_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd_vv_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd_vx_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd_vv_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd_vx_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd_vv_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd_vx_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd_vv_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd_vx_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd_vv_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd_vx_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd_vv_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd_vx_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd_vv_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd_vx_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd_vv_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd_vx_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd_vv_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd_vx_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd_vv_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd_vx_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd_vv_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd_vx_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd_vv_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd_vx_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd_vv_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd_vx_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd_vv_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd_vx_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd_vv_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd_vx_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd_vv_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd_vx_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd_vv_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd_vx_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd_vv_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd_vx_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd_vv_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd_vx_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd_vv_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd_vx_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd_vv_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd_vx_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd_vv_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd_vx_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd_vv_u64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd_vx_u64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
new file mode 100644
index 000000000000..11332ec8b118
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
@@ -0,0 +1,2473 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmax_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmax_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmax_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmax_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmax_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmax_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmax_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmax_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmax_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmax_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmax_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmax_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmax_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmax_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmax_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmax_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmax_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmax_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmax_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmax_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmax_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmax_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmaxu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmaxu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmaxu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmaxu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmaxu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmaxu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmaxu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmaxu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmaxu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmaxu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmaxu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmaxu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmaxu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmaxu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmaxu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmaxu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmaxu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmaxu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmaxu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmaxu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmaxu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmaxu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmax_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmax_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmax_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmax_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmax_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmax_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmax_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmax_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmax_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmax_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmax_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmax_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmax_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmax_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmax_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmax_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmax_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmax_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmax_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmax_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmax_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmax_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmaxu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmaxu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmaxu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmaxu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmaxu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmaxu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmaxu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmaxu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmaxu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmaxu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmaxu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmaxu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmaxu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmaxu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmaxu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmaxu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmaxu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmaxu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmaxu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmaxu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmaxu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmaxu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
new file mode 100644
index 000000000000..7af09cd98de6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmin_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmin_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmin_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmin_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmin_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmin_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmin_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmin_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmin_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmin_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmin_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmin_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmin_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmin_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmin_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmin_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmin_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmin_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmin_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmin_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmin_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmin_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vminu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vminu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vminu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vminu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vminu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vminu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vminu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vminu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vminu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vminu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vminu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vminu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vminu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vminu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vminu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vminu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vminu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vminu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vminu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vminu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vminu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vminu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmin_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmin_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmin_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmin_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmin_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmin_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmin_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmin_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmin_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmin_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmin_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmin_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmin_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmin_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmin_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmin_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmin_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmin_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmin_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmin_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmin_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmin_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vminu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vminu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vminu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vminu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vminu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vminu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vminu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vminu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vminu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vminu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vminu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vminu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vminu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vminu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vminu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vminu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vminu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vminu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vminu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vminu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vminu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vminu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
new file mode 100644
index 000000000000..165bd1a12c1d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
@@ -0,0 +1,6168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmul_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmul_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmul_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmul_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmul_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmul_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmul_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmul_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmul_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmul_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmul_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmul_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmul_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmul_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmul_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmul_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmul_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmul_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmul_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmul_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmul_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmul_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmul_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmul_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmul_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmul_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmul_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmul_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmul_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmul_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmul_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmul_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmul_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmul_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmul_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmul_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmul_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmul_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmul_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmul_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmul_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmul_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmul_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmul_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmulh_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmulh_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmulh_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmulh_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmulh_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmulh_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmulh_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmulh_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmulh_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmulh_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmulh_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmulh_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmulh_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmulh_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmulh_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmulh_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmulh_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmulh_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmulh_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmulh_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmulh_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmulh_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhsu_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhsu_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhsu_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhsu_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhsu_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhsu_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhsu_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhsu_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhsu_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhsu_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhsu_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhsu_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhsu_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhsu_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhsu_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhsu_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhsu_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhsu_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhsu_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhsu_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhsu_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhsu_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmul_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmul_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmul_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmul_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmul_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmul_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmul_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmul_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmul_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmul_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmul_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmul_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmul_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmul_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmul_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmul_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmul_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmul_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmul_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmul_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmul_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmul_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmul_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmul_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmul_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmul_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmul_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmul_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmul_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmulh_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmulh_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmulh_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmulh_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmulh_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmulh_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmulh_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmulh_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmulh_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmulh_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmulh_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmulh_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmulh_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmulh_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmulh_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmulh_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmulh_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmulh_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmulh_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmulh_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmulh_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmulh_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhsu_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhsu_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhsu_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhsu_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhsu_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhsu_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhsu_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhsu_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhsu_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhsu_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhsu_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhsu_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhsu_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhsu_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhsu_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhsu_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhsu_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhsu_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
new file mode 100644
index 000000000000..f3f2ee6a9905
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac_vv_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac_vx_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac_vv_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac_vx_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac_vv_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac_vx_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac_vv_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac_vx_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac_vv_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac_vx_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac_vv_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac_vx_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac_vv_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac_vx_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac_vv_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac_vx_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac_vv_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac_vx_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac_vv_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac_vx_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac_vv_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac_vx_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac_vv_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac_vx_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac_vv_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac_vx_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac_vv_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac_vx_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac_vv_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac_vx_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac_vv_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac_vx_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac_vv_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac_vx_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac_vv_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac_vx_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac_vv_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac_vx_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac_vv_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac_vx_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac_vv_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac_vx_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac_vv_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac_vx_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac_vv_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac_vx_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac_vv_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac_vx_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac_vv_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac_vx_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac_vv_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac_vx_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac_vv_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac_vx_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac_vv_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac_vx_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac_vv_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac_vx_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac_vv_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac_vx_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac_vv_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac_vx_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac_vv_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac_vx_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac_vv_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac_vx_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac_vv_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac_vx_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac_vv_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac_vx_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac_vv_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac_vx_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac_vv_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac_vx_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac_vv_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac_vx_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac_vv_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac_vx_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac_vv_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac_vx_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac_vv_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac_vx_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac_vv_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac_vx_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac_vv_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac_vx_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac_vv_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac_vx_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac_vv_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac_vx_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac_vv_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac_vx_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac_vv_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac_vx_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac_vv_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac_vx_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac_vv_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac_vx_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac_vv_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac_vx_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac_vv_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac_vx_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac_vv_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac_vx_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac_vv_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac_vx_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac_vv_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac_vx_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac_vv_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac_vx_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac_vv_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac_vx_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac_vv_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac_vx_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac_vv_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac_vx_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac_vv_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac_vx_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac_vv_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac_vx_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac_vv_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac_vx_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac_vv_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac_vx_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac_vv_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac_vx_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac_vv_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac_vx_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac_vv_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac_vx_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac_vv_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac_vx_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac_vv_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac_vx_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac_vv_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac_vx_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac_vv_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac_vx_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac_vv_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac_vx_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac_vv_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac_vx_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac_vv_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac_vx_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac_vv_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac_vx_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac_vv_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac_vx_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac_vv_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac_vx_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac_vv_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac_vx_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac_vv_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac_vx_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac_vv_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac_vx_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac_vv_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac_vx_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac_vv_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac_vx_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac_vv_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac_vx_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac_vv_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac_vx_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac_vv_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac_vx_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac_vv_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac_vx_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac_vv_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac_vx_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac_vv_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac_vx_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac_vv_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac_vx_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac_vv_u64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac_vx_u64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
new file mode 100644
index 000000000000..4cc83f48c1a1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub_vv_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub_vx_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub_vv_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub_vx_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub_vv_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub_vx_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub_vv_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub_vx_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub_vv_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub_vx_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub_vv_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub_vx_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub_vv_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub_vx_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub_vv_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub_vx_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub_vv_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub_vx_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub_vv_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub_vx_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub_vv_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub_vx_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub_vv_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub_vx_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub_vv_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub_vx_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub_vv_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub_vx_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub_vv_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub_vx_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub_vv_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub_vx_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub_vv_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub_vx_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub_vv_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub_vx_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub_vv_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub_vx_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub_vv_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub_vx_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub_vv_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub_vx_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub_vv_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub_vx_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub_vv_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub_vx_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub_vv_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub_vx_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub_vv_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub_vx_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub_vv_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub_vx_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub_vv_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub_vx_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub_vv_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub_vx_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub_vv_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub_vx_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub_vv_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub_vx_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub_vv_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub_vx_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub_vv_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub_vx_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub_vv_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub_vx_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub_vv_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub_vx_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub_vv_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub_vx_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub_vv_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub_vx_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub_vv_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub_vx_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub_vv_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub_vx_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub_vv_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub_vx_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub_vv_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub_vx_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub_vv_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub_vx_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub_vv_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub_vx_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub_vv_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub_vx_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub_vv_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub_vx_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub_vv_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub_vx_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub_vv_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub_vx_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub_vv_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub_vx_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub_vv_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub_vx_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub_vv_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub_vx_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub_vv_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub_vx_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub_vv_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub_vx_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub_vv_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub_vx_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub_vv_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub_vx_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub_vv_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub_vx_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub_vv_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub_vx_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub_vv_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub_vx_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub_vv_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub_vx_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub_vv_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub_vx_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub_vv_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub_vx_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub_vv_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub_vx_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub_vv_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub_vx_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub_vv_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub_vx_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub_vv_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub_vx_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub_vv_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub_vx_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub_vv_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub_vx_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub_vv_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub_vx_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub_vv_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub_vx_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub_vv_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub_vx_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub_vv_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub_vx_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub_vv_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub_vx_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub_vv_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub_vx_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub_vv_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub_vx_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub_vv_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub_vx_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub_vv_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub_vx_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub_vv_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub_vx_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub_vv_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub_vx_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub_vv_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub_vx_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub_vv_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub_vx_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub_vv_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub_vx_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub_vv_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub_vx_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub_vv_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub_vx_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub_vv_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub_vx_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub_vv_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub_vx_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub_vv_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub_vx_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub_vv_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub_vx_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub_vv_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub_vx_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub_vv_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub_vx_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub_vv_u64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub_vx_u64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
new file mode 100644
index 000000000000..544b52467304
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vor_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vor_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vor_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vor_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vor_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vor_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vor_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vor_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vor_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vor_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vor_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vor_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vor_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vor_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vor_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vor_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vor_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vor_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vor_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vor_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vor_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vor_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vor_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vor_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vor_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vor_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vor_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vor_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vor_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vor_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vor_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vor_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vor_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vor_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vor_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vor_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vor_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vor_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vor_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vor_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vor_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vor_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vor_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vor_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vor_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vor_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vor_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vor_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vor_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vor_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vor_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vor_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vor_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vor_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vor_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vor_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vor_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vor_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vor_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vor_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vor_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vor_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vor_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vor_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vor_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vor_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vor_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vor_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vor_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vor_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vor_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vor_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vor_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vor_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vor_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vor_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vor_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vor_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vor_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vor_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vor_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vor_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vor_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vor_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vor_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vor_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vor_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vor_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
new file mode 100644
index 000000000000..06e0e8a2749f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vrem_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vrem_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vrem_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vrem_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vrem_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vrem_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vrem_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vrem_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vrem_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vrem_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vrem_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vrem_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vrem_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vrem_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vrem_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vrem_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vrem_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vrem_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vrem_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vrem_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vrem_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vrem_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vremu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vremu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vremu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vremu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vremu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vremu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vremu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vremu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vremu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vremu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vremu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vremu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vremu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vremu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vremu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vremu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vremu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vremu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vremu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vremu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vremu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vremu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vrem_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vrem_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vrem_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vrem_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vrem_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vrem_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vrem_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vrem_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vrem_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vrem_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vrem_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vrem_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vrem_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vrem_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vrem_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vrem_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vrem_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vrem_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vrem_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vrem_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vrem_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vrem_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vremu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vremu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vremu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vremu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vremu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vremu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vremu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vremu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vremu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vremu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vremu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vremu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vremu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vremu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vremu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vremu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vremu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vremu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vremu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vremu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vremu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vremu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
new file mode 100644
index 000000000000..51f905001e57
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
@@ -0,0 +1,1240 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
new file mode 100644
index 000000000000..be5add398e65
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll_vv_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll_vv_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll_vv_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll_vv_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll_vv_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll_vv_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll_vv_i8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll_vv_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll_vv_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll_vv_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll_vv_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll_vv_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll_vv_i16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll_vv_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll_vv_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll_vv_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll_vv_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll_vv_i32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll_vv_i64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll_vv_i64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll_vv_i64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll_vv_i64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll_vv_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll_vv_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll_vv_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll_vv_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll_vv_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll_vv_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll_vv_u8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll_vv_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll_vv_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll_vv_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll_vv_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll_vv_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll_vv_u16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll_vv_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll_vv_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll_vv_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll_vv_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll_vv_u32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll_vv_u64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll_vv_u64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll_vv_u64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll_vv_u64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll_vv_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll_vv_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll_vv_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll_vv_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll_vv_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll_vv_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll_vv_i8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll_vv_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll_vv_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll_vv_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll_vv_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll_vv_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll_vv_i16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll_vv_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll_vv_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll_vv_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll_vv_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll_vv_i32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll_vv_i64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll_vv_i64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll_vv_i64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll_vv_i64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll_vv_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll_vv_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll_vv_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll_vv_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll_vv_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll_vv_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll_vv_u8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll_vv_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll_vv_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll_vv_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll_vv_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll_vv_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll_vv_u16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll_vv_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll_vv_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll_vv_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll_vv_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll_vv_u32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll_vv_u64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll_vv_u64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll_vv_u64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll_vv_u64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m8_m(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
new file mode 100644
index 000000000000..4ad0bcae1e2b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
@@ -0,0 +1,1240 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsra_vv_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsra_vv_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsra_vv_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsra_vv_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsra_vv_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsra_vv_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsra_vv_i8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsra_vv_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsra_vv_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsra_vv_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsra_vv_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsra_vv_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsra_vv_i16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsra_vv_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsra_vv_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsra_vv_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsra_vv_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsra_vv_i32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsra_vv_i64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsra_vv_i64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsra_vv_i64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsra_vv_i64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsra_vv_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsra_vv_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsra_vv_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsra_vv_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsra_vv_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsra_vv_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsra_vv_i8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsra_vv_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsra_vv_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsra_vv_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsra_vv_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsra_vv_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsra_vv_i16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsra_vv_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsra_vv_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsra_vv_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsra_vv_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsra_vv_i32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsra_vv_i64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsra_vv_i64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsra_vv_i64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsra_vv_i64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m8_m(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
new file mode 100644
index 000000000000..9c5df425af96
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
@@ -0,0 +1,1241 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsrl_vv_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsrl_vv_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsrl_vv_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsrl_vv_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsrl_vv_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsrl_vv_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsrl_vv_u8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsrl_vv_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsrl_vv_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsrl_vv_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsrl_vv_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsrl_vv_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsrl_vv_u16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsrl_vv_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsrl_vv_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsrl_vv_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsrl_vv_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsrl_vv_u32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsrl_vv_u64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsrl_vv_u64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsrl_vv_u64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsrl_vv_u64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
new file mode 100644
index 000000000000..4ac644ebbbfd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsub_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsub_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsub_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsub_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsub_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsub_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsub_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vsub_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vsub_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsub_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsub_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsub_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsub_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vsub_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsub_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsub_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsub_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsub_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsub_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsub_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsub_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsub_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vsub_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vsub_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vsub_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vsub_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vsub_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vsub_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vsub_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vsub_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vsub_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vsub_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vsub_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vsub_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vsub_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vsub_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vsub_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vsub_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vsub_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vsub_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vsub_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vsub_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vsub_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vsub_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsub_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsub_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsub_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsub_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vsub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vsub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsub_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsub_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsub_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsub_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vsub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsub_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsub_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsub_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsub_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsub_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsub_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsub_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsub_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vsub_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vsub_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vsub_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vsub_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vsub_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vsub_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vsub_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vsub_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vsub_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vsub_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vsub_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vsub_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vsub_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vsub_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vsub_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vsub_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vsub_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vsub_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vsub_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vsub_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vsub_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vsub_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
new file mode 100644
index 000000000000..2351c55bd7f5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vxor_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vxor_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vxor_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vxor_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vxor_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vxor_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vxor_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vxor_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vxor_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vxor_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vxor_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vxor_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vxor_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vxor_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vxor_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vxor_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vxor_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vxor_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vxor_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vxor_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vxor_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vxor_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vxor_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vxor_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vxor_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vxor_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vxor_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vxor_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vxor_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vxor_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vxor_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vxor_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vxor_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vxor_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vxor_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vxor_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vxor_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vxor_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vxor_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vxor_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vxor_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vxor_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vxor_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vxor_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vxor_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vxor_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vxor_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vxor_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vxor_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vxor_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vxor_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vxor_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vxor_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vxor_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vxor_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vxor_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vxor_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vxor_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vxor_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vxor_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vxor_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vxor_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vxor_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vxor_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vxor_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vxor_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vxor_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vxor_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vxor_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vxor_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vxor_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vxor_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vxor_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vxor_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vxor_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vxor_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vxor_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vxor_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vxor_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vxor_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vxor_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vxor_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vxor_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vxor_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vxor_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vxor_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vxor_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vxor_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
More information about the cfe-commits
mailing list